diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -109,6 +109,14 @@ : RVVSignedBinBuiltinSet, RVVUnsignedBinBuiltinSet; +multiclass RVVInt64BinBuiltinSet + : RVVOutOp1BuiltinSet, + RVVOutOp1BuiltinSet; + multiclass RVVSlideOneBuiltinSet : RVVOutOp1BuiltinSet { + if HasVV then { + defvar suffix = !if(!or(HasVS, !eq(NAME, "vsm4r")), "vv", "v"); + // We don't need suffix in Zvkb extension since it's consider as normal + // arithmetic operation such as vadd. So we apply the same rule to this + // extension and re-use the APIs that are already defined. + defvar name = NAME # !if(IsZvkb, "", "_" # suffix); + let OverloadedName = name in + defm "" : RVVOutBuiltinSet; + } + + if HasVS then { + // vaesz only has 'vs' and does not have ambiguous prototypes like other + // zvkned instructions (e.g. vaesdf), so we don't need to encode the operand + // mnemonics into its intrinsic function name. + defvar suffix = !if(!eq(NAME, "vgmul"), "vv", "vs"); + defvar name = NAME # !if(!or(IsZvkb, !or(!eq(NAME, "vaesz"), + !eq(NAME, "vgmul"))), + "", "_" # suffix); + let OverloadedName = name in + defm "" : RVVOutOp1BuiltinSet; + } +} + +multiclass RVVOutOp1BuiltinSetP { + defvar suffix = !if(IsVV, "vv", "vi"); + defvar prototype = !if(IsVV, "UvUvUvUv", "UvUvUvUe"); + defm "" : RVVBuiltinSet; +} + +let UnMaskedPolicyScheme = HasPassthruOperand in { + // zvbb + defm vandn : RVVIntBinBuiltinSet; + defm vbrev : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>; + defm vbrev8 : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>; + defm vrev8 : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>; + defm vclz : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>; + defm vctz : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>; + defm vcpopv : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>; + defm vrol : RVVIntBinBuiltinSet; + defm vror : RVVIntBinBuiltinSet; + defm vwsll : RVVUnsignedWidenBinBuiltinSet, RVVSignedWidenBinBuiltinSet; + + // zvbc + defm vclmul : RVVInt64BinBuiltinSet; + defm vclmulh : RVVInt64BinBuiltinSet; +} + +let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in { + // zvkg + defm vghsh : RVVOutOp1BuiltinSetP; + defm vgmul : RVVOutBuiltinSetP<0>; + + // zvkned + defm vaesdf : RVVOutBuiltinSetP; + defm vaesdm : RVVOutBuiltinSetP; + defm vaesef : RVVOutBuiltinSetP; + defm vaesem : RVVOutBuiltinSetP; + let UnMaskedPolicyScheme = HasPassthruOperand in + defm vaeskf1 : RVVOutOp1BuiltinSet<"vaeskf1", "i", [["vi", "Uv", "UvUvUe"]]>; + defm vaeskf2 : RVVOutOp1BuiltinSetP<0>; + defm vaesz : RVVOutBuiltinSetP<0>; + + // zvknha or zvknhb + defm vsha2ch : RVVOutOp1BuiltinSetP<1, "il">; + defm vsha2cl : RVVOutOp1BuiltinSetP<1, "il">; + defm vsha2ms : RVVOutOp1BuiltinSetP<1, "il">; + + // zvksed + let UnMaskedPolicyScheme = HasPassthruOperand in + defm vsm4k : RVVOutOp1BuiltinSet<"vsm4k", "i", [["vi", "Uv", "UvUvUe"]]>; + defm vsm4r : RVVOutBuiltinSetP; + + // zvksh + defm vsm3c : RVVOutOp1BuiltinSetP<0>; + let UnMaskedPolicyScheme = HasPassthruOperand in + defm vsm3me : RVVOutOp1BuiltinSet<"vsm3me", "i", [["vv", "Uv", "UvUvUv"]]>; +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesdf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesdf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesdf.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdf_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesdm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesdm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesdm.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdm_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesef.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesef.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesef.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesef_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesem.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesem.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesem_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaeskf1.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaeskf1.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaeskf1.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf1_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaeskf2.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaeskf2.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaeskf2.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf2_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vaesz.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesz_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vandn.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vandn.c @@ -0,0 +1,1590 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vandn_vv_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_m(vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_m(vbool8_t mask, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_m(vbool4_t mask, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_m(vbool4_t mask, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_m(vbool2_t mask, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_m(vbool2_t mask, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_m(vbool1_t mask, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_m(vbool1_t mask, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_m(vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_m(vbool16_t mask, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_m(vbool8_t mask, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_m(vbool8_t mask, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_m(vbool4_t mask, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_m(vbool4_t mask, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_m(vbool2_t mask, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_m(vbool2_t mask, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_m(vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_m(vbool32_t mask, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_m(vbool16_t mask, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_m(vbool16_t mask, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_m(vbool8_t mask, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_m(vbool8_t mask, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_m(vbool4_t mask, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_m(vbool4_t mask, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_m(vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_m(vbool64_t mask, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_m(vbool32_t mask, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_m(vbool32_t mask, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_m(vbool16_t mask, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_m(vbool16_t mask, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_m(vbool8_t mask, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_m(vbool8_t mask, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vbrev.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vbrev.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vbrev.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vbrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vbrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vbrev8.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev8_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclmul.c @@ -0,0 +1,150 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmul_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclmulh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclmulh.c @@ -0,0 +1,150 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmulh_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vclz.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclz_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcpopv.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vcpopv_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf8(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m1(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m4(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m8(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m1(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m4(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m8(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m1(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m4(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m8(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m1(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m2(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m4(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m8(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m8_m(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vctz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vctz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vctz.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vctz_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vghsh.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vghsh_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vgmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vgmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vgmul.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vgmul_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrev8.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrev8_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrol.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrol.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrol.c @@ -0,0 +1,1590 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrol_vv_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_m(vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_m(vbool8_t mask, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_m(vbool4_t mask, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_m(vbool4_t mask, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_m(vbool2_t mask, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_m(vbool2_t mask, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_m(vbool1_t mask, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_m(vbool1_t mask, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_m(vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_m(vbool16_t mask, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_m(vbool8_t mask, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_m(vbool8_t mask, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_m(vbool4_t mask, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_m(vbool4_t mask, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_m(vbool2_t mask, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_m(vbool2_t mask, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_m(vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_m(vbool32_t mask, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_m(vbool16_t mask, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_m(vbool16_t mask, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_m(vbool8_t mask, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_m(vbool8_t mask, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_m(vbool4_t mask, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_m(vbool4_t mask, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_m(vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_m(vbool64_t mask, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_m(vbool32_t mask, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_m(vbool32_t mask, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_m(vbool16_t mask, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_m(vbool16_t mask, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_m(vbool8_t mask, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_m(vbool8_t mask, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vror.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vror.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vror.c @@ -0,0 +1,1590 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vror_vv_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_i8m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_i8m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_i8m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_i8m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_i16m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_i16m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_i16m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_i16m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_i32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_i32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_i32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_i32m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_i64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_i64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_i64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_i64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_m(vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_i8m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_m(vbool8_t mask, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_m(vbool4_t mask, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_i8m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_m(vbool4_t mask, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_m(vbool2_t mask, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_i8m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_m(vbool2_t mask, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_m(vbool1_t mask, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_i8m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_m(vbool1_t mask, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_m(vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_i16m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_m(vbool16_t mask, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_m(vbool8_t mask, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_i16m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_m(vbool8_t mask, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_m(vbool4_t mask, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_i16m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_m(vbool4_t mask, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_m(vbool2_t mask, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_i16m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_m(vbool2_t mask, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i32mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_m(vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_i32m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_m(vbool32_t mask, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_m(vbool16_t mask, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_i32m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_m(vbool16_t mask, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_m(vbool8_t mask, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_i32m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_m(vbool8_t mask, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_m(vbool4_t mask, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_i32m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_m(vbool4_t mask, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_m(vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_i64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_m(vbool64_t mask, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_m(vbool32_t mask, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_i64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_m(vbool32_t mask, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_m(vbool16_t mask, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_i64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_m(vbool16_t mask, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_m(vbool8_t mask, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_i64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_m(vbool8_t mask, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_m(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_m(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_m(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ch.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ch_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2cl.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2cl_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsha2ms.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ms_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm3c.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm3c.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm3c.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3c_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm3me.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm3me.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm3me.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3me_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm4k.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm4k.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm4k.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4k_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm4r.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm4r.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsm4r.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4r_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsll.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwsll.c @@ -0,0 +1,1085 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vwsll_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32mf2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m1(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m2(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m4(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m8(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m1_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m2_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m4_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m8_m(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m8_m(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesdf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesdf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesdf.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdf_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesdm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesdm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesdm.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdm_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesef.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesef.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesef.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesef_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesem.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesem.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesem_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaeskf1.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaeskf1.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaeskf1.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf1_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaeskf2.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaeskf2.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaeskf2.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf2_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vaesz.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesz_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vandn.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vandn.c @@ -0,0 +1,1590 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vandn_vv_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_m(vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_m(vbool8_t mask, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_m(vbool4_t mask, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_m(vbool4_t mask, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_m(vbool2_t mask, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_m(vbool2_t mask, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_m(vbool1_t mask, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_m(vbool1_t mask, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_m(vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_m(vbool16_t mask, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_m(vbool8_t mask, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_m(vbool8_t mask, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_m(vbool4_t mask, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_m(vbool4_t mask, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_m(vbool2_t mask, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_m(vbool2_t mask, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_m(vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_m(vbool32_t mask, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_m(vbool16_t mask, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_m(vbool16_t mask, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_m(vbool8_t mask, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_m(vbool8_t mask, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_m(vbool4_t mask, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_m(vbool4_t mask, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_m(vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_m(vbool64_t mask, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_m(vbool32_t mask, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_m(vbool32_t mask, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_m(vbool16_t mask, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_m(vbool16_t mask, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_m(vbool8_t mask, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_m(vbool8_t mask, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vbrev.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vbrev.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vbrev.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vbrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vbrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vbrev8.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev8_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclmul.c @@ -0,0 +1,150 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmul_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclmulh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclmulh.c @@ -0,0 +1,150 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmulh_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vclz.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclz_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcpopv.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vcpopv_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv(vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vctz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vctz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vctz.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vctz_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vghsh.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vghsh_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vgmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vgmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vgmul.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vgmul_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrev8.c @@ -0,0 +1,402 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrev8_v_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv64i8.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i16.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i32.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i64.i64( poison, [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( poison, [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(mask, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrol.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrol.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrol.c @@ -0,0 +1,1590 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrol_vv_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_m(vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_m(vbool8_t mask, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_m(vbool4_t mask, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_m(vbool4_t mask, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_m(vbool2_t mask, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_m(vbool2_t mask, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_m(vbool1_t mask, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_m(vbool1_t mask, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_m(vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_m(vbool16_t mask, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_m(vbool8_t mask, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_m(vbool8_t mask, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_m(vbool4_t mask, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_m(vbool4_t mask, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_m(vbool2_t mask, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_m(vbool2_t mask, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_m(vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_m(vbool32_t mask, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_m(vbool16_t mask, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_m(vbool16_t mask, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_m(vbool8_t mask, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_m(vbool8_t mask, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_m(vbool4_t mask, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_m(vbool4_t mask, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_m(vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_m(vbool64_t mask, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_m(vbool32_t mask, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_m(vbool32_t mask, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_m(vbool16_t mask, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_m(vbool16_t mask, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_m(vbool8_t mask, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_m(vbool8_t mask, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vror.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vror.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vror.c @@ -0,0 +1,1590 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vror_vv_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_m(vbool8_t mask, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_m(vbool8_t mask, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_m(vbool4_t mask, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_m(vbool4_t mask, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_m(vbool2_t mask, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_m(vbool2_t mask, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_m(vbool1_t mask, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_m(vbool1_t mask, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_m(vbool16_t mask, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_m(vbool16_t mask, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_m(vbool8_t mask, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_m(vbool8_t mask, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_m(vbool4_t mask, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_m(vbool4_t mask, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_m(vbool2_t mask, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_m(vbool2_t mask, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_m(vbool32_t mask, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_m(vbool32_t mask, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_m(vbool16_t mask, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_m(vbool16_t mask, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_m(vbool8_t mask, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_m(vbool8_t mask, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_m(vbool4_t mask, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_m(vbool4_t mask, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_m(vbool64_t mask, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_m(vbool64_t mask, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_m(vbool32_t mask, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_m(vbool32_t mask, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_m(vbool16_t mask, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_m(vbool16_t mask, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_m(vbool8_t mask, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_m(vbool8_t mask, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( poison, [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( poison, [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( poison, [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(mask, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( poison, [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror(mask, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ch.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ch_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2cl.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2cl_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsha2ms.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ms_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm3c.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm3c.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm3c.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3c_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm3me.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm3me.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm3me.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3me_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64( poison, [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm4k.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm4k.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm4k.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4k_vi_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv1i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv2i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv4i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv8i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv16i32.i32.i64( poison, [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm4r.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm4r.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsm4r.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4r_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vs_u32mf2(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vs_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vs_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vs_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsll.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwsll.c @@ -0,0 +1,1085 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vwsll_vv_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( poison, [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( poison, [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv(mask, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_m( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( poison, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesdf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesdf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesdf.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdf_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesdm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesdm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesdm.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdm_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesef.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesef.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesef.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesef_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesem.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesem.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesem_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaeskf1.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaeskf1.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaeskf1.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf1_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaeskf2.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaeskf2.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaeskf2.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf2_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vaesz.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesz_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vandn.c @@ -0,0 +1,3174 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vandn_vv_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_vx_i8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_vx_i16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_vx_i32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_i64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_vx_i64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vbrev.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vbrev.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vbrev.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vbrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vbrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vbrev8.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmul.c @@ -0,0 +1,294 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmul_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclmulh.c @@ -0,0 +1,294 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vclz.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclz_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcpopv.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vctz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vctz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vctz.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vctz_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vghsh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vghsh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vghsh.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vghsh_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vgmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vgmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vgmul.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vgmul_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrev8.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrev8_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrol.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrol.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrol.c @@ -0,0 +1,3174 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrol_vv_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_vx_i8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_vx_i16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_vx_i32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_i64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_vx_i64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vror.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vror.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vror.c @@ -0,0 +1,3174 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vror_vv_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_i8m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_i8m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_i8m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_i8m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_i16m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_i16m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_i16m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_i16m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_i32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_i32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_i32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_i32m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_i64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_i64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_i64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_i64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_i8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_i8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_i8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_i8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_i16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_i16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_i16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_i16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_i32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_i32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_i32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_i32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_i64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_i64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_i64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_i64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_i8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_i8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_i8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_i8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_i16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_i16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_i16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_i16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_i32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_i32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_i32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_i32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_i64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_i64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_i64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_i64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_i8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_i8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_i8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_i8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_vx_i8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_i16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_i16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_i16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_i16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_vx_i16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_i32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_i32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_i32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_i32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_i32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_vx_i32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_i64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_i64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_i64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_i64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_vx_i64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2ch.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2ch.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ch_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2cl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2cl.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2cl_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2ms.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsha2ms.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ms_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm3c.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm3c.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm3c.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3c_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm3me.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm3me.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm3me.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3me_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(maskedoff, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm4k.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm4k.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm4k.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4k_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(maskedoff, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm4r.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm4r.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsm4r.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4r_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m8_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsll.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vwsll.c @@ -0,0 +1,2165 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesdf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesdf.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesdf.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdf_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdf_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdf.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesdm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesdm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesdm.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesdm_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesdm_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesdm.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesef.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesef.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesef.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesef_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesef_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesef.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesem.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesem.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesem_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesem_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesem.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaeskf1.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaeskf1.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaeskf1.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf1_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf1_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf1.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf1_tu(maskedoff, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaeskf2.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaeskf2.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaeskf2.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaeskf2_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vaeskf2_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaeskf2.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vaesz.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vaesz_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vaesz_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaesz.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vandn.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vandn.c @@ -0,0 +1,3174 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vandn_vv_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vandn_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vandn_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vandn_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vandn_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vandn_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vandn_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vandn_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vandn_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vandn_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vandn_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vandn_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vandn_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vandn_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vandn_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vandn_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vandn_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vandn_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vandn_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vandn_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vandn_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vandn_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vandn_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vandn_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vandn_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vandn.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vbrev.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vbrev.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vbrev.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vbrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vbrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vbrev8.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vbrev8_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vbrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclmul.c @@ -0,0 +1,294 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmul_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmul_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclmulh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclmulh.c @@ -0,0 +1,294 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vclmulh_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vclz.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vclz_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vclz_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vclz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcpopv.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vcpopv_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vcpopv_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vcpopv_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vcpopv_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vcpopv_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vcpopv_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vcpopv_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vcpopv_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vcpopv_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vcpopv_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vcpopv_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vcpopv_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vcpopv_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcpopv_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vcpopv_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vcpopv_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vcpopv_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vcpopv_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vcpopv_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vcpopv_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vcpopv_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vcpopv_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcpopv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vcpopv_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpopv_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vctz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vctz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vctz.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vctz_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vctz_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vctz.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vghsh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vghsh.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vghsh.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vghsh_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vghsh_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vghsh.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vgmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vgmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vgmul.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vgmul_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vgmul_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vgmul.vv.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrev8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrev8.c @@ -0,0 +1,798 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrev8_v_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tum(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + +// CHECK-LABEL: @test_vrev8_v_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrev8.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_mu(mask, maskedoff, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrol.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrol.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrol.c @@ -0,0 +1,3174 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vrol_vv_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrol_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrol_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrol_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrol_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrol_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrol_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrol_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrol_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrol_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrol_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrol_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrol_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrol_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrol_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrol_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrol_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrol_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrol_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrol_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrol_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrol_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrol_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vrol_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vrol_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrol.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrol_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vror.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vror.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vror.c @@ -0,0 +1,3174 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vror_vv_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tu(maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tum(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_tumu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vror_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vror_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vror_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m1_t test_vror_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m2_t test_vror_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m4_t test_vror_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint8m8_t test_vror_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vror_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vror_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vror_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vror_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vror_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vror_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vror_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vror_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vror_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vror_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vror_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vror_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vror_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vror_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vror_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u8m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i8 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i16 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + +// CHECK-LABEL: @test_vror_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vror_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vror.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i64 [[RS1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vror_mu(mask, maskedoff, vs2, rs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2ch.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2ch.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ch_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ch_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2cl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2cl.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2cl_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2cl_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2ms.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsha2ms.c @@ -0,0 +1,87 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsha2ms_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsha2ms_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64( [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm3c.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm3c.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm3c.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3c_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv1i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv2i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv4i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv8i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm3c_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3c.nxv16i32.i32.i64( [[VD:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm3me.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm3me.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm3me.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm3me_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + +// CHECK-LABEL: @test_vsm3me_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(maskedoff, vs2, vs1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4k.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4k.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4k.c @@ -0,0 +1,51 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4k_vi_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + +// CHECK-LABEL: @test_vsm4k_vi_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4k.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[VS2:%.*]], i32 0, i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t uimm, size_t vl) { + return __riscv_vsm4k_tu(maskedoff, vs2, 0, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4r.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4r.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsm4r.c @@ -0,0 +1,96 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vsm4r_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv1i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv1i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsm4r_vs_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv2i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsm4r_vs_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv4i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv4i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsm4r_vs_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv8i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv8i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsm4r_vs_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vv.nxv16i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +// CHECK-LABEL: @test_vsm4r_vs_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsm4r.vs.nxv16i32.nxv2i32.i64( [[VD:%.*]], [[VS2:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsm4r_vs_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsll.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vwsll.c @@ -0,0 +1,2165 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_i64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_tu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_tum( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_tumu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u16m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32mf2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u32m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m1_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m2_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m4_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vv_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vwsll_vv_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-LABEL: @test_vwsll_vx_u64m8_mu( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vwsll_vx_mu(mask, maskedoff, op1, op2, vl); +}