diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -200,24 +200,28 @@ // Basic classes with automatic codegen. //===----------------------------------------------------------------------===// -class RVVBinBuiltin - : RVVBuiltin { - let IntrinsicTypes = [-1, 1]; -} - -multiclass RVVBinBuiltinSet> suffixes_prototypes> { - let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in { +multiclass RVVBuiltinSet> suffixes_prototypes, + list intrinsic_types> { + let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask", + IntrinsicTypes = intrinsic_types in { foreach s_p = suffixes_prototypes in { let Name = NAME # "_" # s_p[0] in { defvar suffix = s_p[1]; defvar prototype = s_p[2]; - def : RVVBinBuiltin; + def : RVVBuiltin; } } } } +// IntrinsicTypes is output, op1 [-1, 1] +multiclass RVVOutOp1BuiltinSet> suffixes_prototypes> { + defm NAME + : RVVBuiltinSet; +} + defvar TypeList = ["c","s","i","l","f","d"]; defvar EEWList = [["8", "(Log2EEW:3)"], ["16", "(Log2EEW:4)"], @@ -387,14 +391,147 @@ // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract -defm vadd : RVVBinBuiltinSet<"vadd", "csil", - [["vv", "v", "vvv"], - ["vx", "v", "vve"], - ["vv", "Uv", "UvUvUv"], - ["vx", "Uv", "UvUvUe"]]>; +defm vadd : RVVOutOp1BuiltinSet<"vadd", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vsub : RVVOutOp1BuiltinSet<"vsub", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil", + [["vx", "v", "vve"], + ["vx", "Uv", "UvUvUe"]]>; + +// 12.2. Vector Widening Integer Add/Subtract +// TODO + +// 12.3. Vector Integer Extension +// TODO + +// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions +// TODO + +// 12.5. Vector Bitwise Logical Instructions +defm vand : RVVOutOp1BuiltinSet<"vand", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vxor : RVVOutOp1BuiltinSet<"vxor", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vor : RVVOutOp1BuiltinSet<"vor", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; + +// 12.6. Vector Single-Width Bit Shift Instructions +defm vsll : RVVOutOp1BuiltinSet<"vsll", "csil", + [["vv", "v", "vvUv"], + ["vx", "v", "vvz"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvz"]]>; +defm vsrl : RVVOutOp1BuiltinSet<"vsrl", "csil", + [["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvz"]]>; +defm vsra : RVVOutOp1BuiltinSet<"vsra", "csil", + [["vv", "v", "vvUv"], + ["vx", "v", "vvz"]]>; + +// 12.7. Vector Narrowing Integer Right Shift Instructions +// TODO + +// 12.8. Vector Integer Comparison Instructions +// TODO + +// 12.9. Vector Integer Min/Max Instructions +defm vminu : RVVOutOp1BuiltinSet<"vminu", "csil", + [["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vmin : RVVOutOp1BuiltinSet<"vmin", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"]]>; +defm vmaxu : RVVOutOp1BuiltinSet<"vmaxu", "csil", + [["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vmax : RVVOutOp1BuiltinSet<"vmax", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"]]>; + +// 12.10. Vector Single-Width Integer Multiply Instructions +defm vmul : RVVOutOp1BuiltinSet<"vmul", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"], + ["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vmulh : RVVOutOp1BuiltinSet<"vmulh", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"]]>; +defm vmulhu : RVVOutOp1BuiltinSet<"vmulhu", "csil", + [["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil", + [["vv", "v", "vvUv"], + ["vx", "v", "vvUe"]]>; + +// 12.11. Vector Integer Divide Instructions +defm vdivu : RVVOutOp1BuiltinSet<"vdivu", "csil", + [["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vdiv : RVVOutOp1BuiltinSet<"vdiv", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"]]>; +defm vremu : RVVOutOp1BuiltinSet<"vremu", "csil", + [["vv", "Uv", "UvUvUv"], + ["vx", "Uv", "UvUvUe"]]>; +defm vrem : RVVOutOp1BuiltinSet<"vrem", "csil", + [["vv", "v", "vvv"], + ["vx", "v", "vve"]]>; + +// 12.12. Vector Widening Integer Multiply Instructions +// TODO + +// 12.13. Vector Single-Width Integer Multiply-Add Instructions +let HasMaskedOffOperand = false in { +defm vmacc : RVVOutOp1BuiltinSet<"vmacc", "csil", + [["vv", "v", "vvvv"], + ["vx", "v", "vvev"], + ["vv", "Uv", "UvUvUvUv"], + ["vx", "Uv", "UvUvUeUv"]]>; +defm vnmsac : RVVOutOp1BuiltinSet<"vnmsac", "csil", + [["vv", "v", "vvvv"], + ["vx", "v", "vvev"], + ["vv", "Uv", "UvUvUvUv"], + ["vx", "Uv", "UvUvUeUv"]]>; +defm vmadd : RVVOutOp1BuiltinSet<"vmadd", "csil", + [["vv", "v", "vvvv"], + ["vx", "v", "vvev"], + ["vv", "Uv", "UvUvUvUv"], + ["vx", "Uv", "UvUvUeUv"]]>; +defm vnmsub : RVVOutOp1BuiltinSet<"vnmsub", "csil", + [["vv", "v", "vvvv"], + ["vx", "v", "vvev"], + ["vv", "Uv", "UvUvUvUv"], + ["vx", "Uv", "UvUvUeUv"]]>; +} + +// 12.14. Vector Widening Integer Multiply-Add Instructions +// TODO + +// 12.15. Vector Integer Merge Instructions +// TODO + +// 12.16. Vector Integer Move Instructions +// TODO // 14. Vector Floating-Point Instructions // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm vfadd : RVVBinBuiltinSet<"vfadd", "fd", - [["vv", "v", "vvv"], - ["vf", "v", "vve"]]>; +defm vfadd : RVVOutOp1BuiltinSet<"vfadd", "fd", + [["vv", "v", "vvv"], + ["vf", "v", "vve"]]>; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmacc(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmadd(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmax(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmaxu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmax(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmaxu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmin(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vminu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmin(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vminu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -0,0 +1,6168 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vrem(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vremu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vrem(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vremu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -0,0 +1,1240 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { + return vsll(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vsll(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -0,0 +1,1240 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { + return vsra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vsra(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -0,0 +1,1240 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vand_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc_vv_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc_vx_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmacc_vv_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmacc_vx_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmacc_vv_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmacc_vx_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmacc_vv_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmacc_vx_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmacc_vv_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmacc_vx_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmacc_vv_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmacc_vx_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmacc_vv_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmacc_vx_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmacc_vv_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmacc_vx_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmacc_vv_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmacc_vx_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmacc_vv_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmacc_vx_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmacc_vv_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmacc_vx_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmacc_vv_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmacc_vx_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmacc_vv_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmacc_vx_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmacc_vv_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmacc_vx_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmacc_vv_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmacc_vx_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmacc_vv_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmacc_vx_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmacc_vv_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmacc_vx_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmacc_vv_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmacc_vx_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmacc_vv_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmacc_vx_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmacc_vv_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmacc_vx_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmacc_vv_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmacc_vx_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmacc_vv_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmacc_vx_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc_vv_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc_vx_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc_vv_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc_vx_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc_vv_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc_vx_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmacc_vv_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmacc_vx_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmacc_vv_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmacc_vx_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmacc_vv_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmacc_vx_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc_vv_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc_vx_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc_vv_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc_vx_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc_vv_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc_vx_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmacc_vv_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmacc_vx_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmacc_vv_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmacc_vx_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmacc_vv_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmacc_vx_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmacc_vv_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmacc_vx_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc_vv_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc_vx_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmacc_vv_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmacc_vx_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmacc_vv_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmacc_vx_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmacc_vv_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmacc_vx_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmacc_vv_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmacc_vx_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmacc_vv_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmacc_vx_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmacc_vv_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmacc_vx_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmacc_vv_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmacc_vx_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmacc_vv_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmacc_vx_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc_vv_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmacc_vx_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmacc_vv_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmacc_vx_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmacc_vv_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmacc_vx_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmacc_vv_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmacc_vx_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmacc_vv_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmacc_vx_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmacc_vv_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmacc_vx_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmacc_vv_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmacc_vx_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmacc_vv_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmacc_vx_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmacc_vv_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmacc_vx_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmacc_vv_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmacc_vx_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmacc_vv_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmacc_vx_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmacc_vv_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmacc_vx_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmacc_vv_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmacc_vx_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmacc_vv_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmacc_vx_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmacc_vv_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmacc_vx_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmacc_vv_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmacc_vx_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmacc_vv_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmacc_vx_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmacc_vv_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmacc_vx_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmacc_vv_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmacc_vx_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmacc_vv_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmacc_vx_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmacc_vv_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmacc_vx_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmacc_vv_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmacc_vx_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc_vv_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmacc_vx_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc_vv_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmacc_vx_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc_vv_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmacc_vx_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmacc_vv_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmacc_vx_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmacc_vv_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmacc_vx_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmacc_vv_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmacc_vx_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc_vv_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmacc_vx_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc_vv_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmacc_vx_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc_vv_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmacc_vx_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmacc_vv_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmacc_vx_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmacc_vv_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmacc_vx_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmacc_vv_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmacc_vx_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmacc_vv_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmacc_vx_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc_vv_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmacc_vx_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmacc_vv_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmacc_vx_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmacc_vv_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmacc_vx_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmacc_vv_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmacc_vx_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmacc_vv_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmacc_vx_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmacc_vv_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmacc_vx_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmacc_vv_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmacc_vx_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmacc_vv_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmacc_vx_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmacc_vv_u64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmacc_vx_u64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd_vv_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd_vx_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmadd_vv_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmadd_vx_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmadd_vv_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmadd_vx_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmadd_vv_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmadd_vx_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmadd_vv_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmadd_vx_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmadd_vv_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmadd_vx_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmadd_vv_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmadd_vx_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmadd_vv_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmadd_vx_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmadd_vv_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmadd_vx_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmadd_vv_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmadd_vx_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmadd_vv_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmadd_vx_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmadd_vv_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmadd_vx_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmadd_vv_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmadd_vx_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmadd_vv_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmadd_vx_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmadd_vv_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmadd_vx_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmadd_vv_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmadd_vx_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmadd_vv_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmadd_vx_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmadd_vv_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmadd_vx_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmadd_vv_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmadd_vx_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmadd_vv_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmadd_vx_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmadd_vv_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmadd_vx_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmadd_vv_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmadd_vx_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd_vv_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd_vx_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd_vv_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd_vx_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd_vv_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd_vx_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmadd_vv_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmadd_vx_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmadd_vv_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmadd_vx_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmadd_vv_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmadd_vx_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd_vv_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd_vx_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd_vv_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd_vx_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd_vv_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd_vx_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmadd_vv_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmadd_vx_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmadd_vv_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmadd_vx_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmadd_vv_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmadd_vx_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmadd_vv_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmadd_vx_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd_vv_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd_vx_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmadd_vv_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmadd_vx_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmadd_vv_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmadd_vx_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmadd_vv_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmadd_vx_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmadd_vv_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmadd_vx_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmadd_vv_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmadd_vx_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmadd_vv_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmadd_vx_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmadd_vv_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmadd_vx_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmadd_vv_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmadd_vx_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd_vv_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vmadd_vx_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmadd_vv_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vmadd_vx_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmadd_vv_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vmadd_vx_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmadd_vv_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vmadd_vx_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmadd_vv_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vmadd_vx_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmadd_vv_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vmadd_vx_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmadd_vv_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vmadd_vx_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmadd_vv_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vmadd_vx_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmadd_vv_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vmadd_vx_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmadd_vv_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vmadd_vx_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmadd_vv_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vmadd_vx_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmadd_vv_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vmadd_vx_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmadd_vv_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vmadd_vx_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmadd_vv_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vmadd_vx_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmadd_vv_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vmadd_vx_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmadd_vv_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vmadd_vx_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmadd_vv_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vmadd_vx_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmadd_vv_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vmadd_vx_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmadd_vv_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vmadd_vx_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmadd_vv_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vmadd_vx_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmadd_vv_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vmadd_vx_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmadd_vv_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vmadd_vx_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd_vv_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vmadd_vx_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd_vv_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vmadd_vx_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd_vv_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vmadd_vx_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmadd_vv_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vmadd_vx_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmadd_vv_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vmadd_vx_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmadd_vv_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vmadd_vx_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd_vv_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vmadd_vx_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd_vv_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vmadd_vx_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd_vv_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vmadd_vx_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmadd_vv_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vmadd_vx_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmadd_vv_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vmadd_vx_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmadd_vv_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vmadd_vx_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmadd_vv_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vmadd_vx_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd_vv_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vmadd_vx_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmadd_vv_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vmadd_vx_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmadd_vv_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vmadd_vx_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmadd_vv_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vmadd_vx_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmadd_vv_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vmadd_vx_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmadd_vv_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vmadd_vx_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmadd_vv_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vmadd_vx_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmadd_vv_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vmadd_vx_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmadd_vv_u64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vmadd_vx_u64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c @@ -0,0 +1,2473 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmax_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmax_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmax_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmax_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmax_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmax_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmax_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmax_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmax_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmax_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmax_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmax_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmax_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmax_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmax_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmax_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmax_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmax_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmax_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmax_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmax_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmaxu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmaxu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmaxu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmaxu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmaxu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmaxu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmaxu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmaxu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmaxu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmaxu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmaxu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmaxu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmaxu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmaxu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmaxu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmaxu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmaxu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmaxu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmaxu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmaxu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmaxu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmax_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmax_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmax_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmax_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmax_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmax_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmax_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmax_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmax_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmax_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmax_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmax_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmax_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmax_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmax_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmax_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmax_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmax_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmax_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmax_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmax_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmax_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmax_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmax_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmax_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmax_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmaxu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmaxu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmaxu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmaxu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmaxu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmaxu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmaxu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmaxu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmaxu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmaxu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmaxu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmaxu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmaxu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmaxu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmaxu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmaxu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmaxu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmaxu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmaxu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmaxu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmaxu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmaxu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmaxu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmaxu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmaxu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmaxu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmin_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmin_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmin_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmin_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmin_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmin_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmin_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmin_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmin_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmin_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmin_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmin_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmin_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmin_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmin_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmin_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmin_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmin_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmin_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmin_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmin_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vminu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vminu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vminu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vminu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vminu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vminu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vminu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vminu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vminu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vminu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vminu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vminu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vminu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vminu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vminu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vminu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vminu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vminu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vminu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vminu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vminu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmin_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmin_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmin_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmin_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmin_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmin_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmin_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmin_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmin_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmin_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmin_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmin_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmin_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmin_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmin_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmin_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmin_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmin_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmin_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmin_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmin_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmin_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmin_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmin_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmin_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmin_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vminu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vminu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vminu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vminu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vminu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vminu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vminu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vminu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vminu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vminu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vminu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vminu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vminu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vminu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vminu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vminu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vminu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vminu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vminu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vminu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vminu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vminu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vminu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vminu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vminu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vminu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c @@ -0,0 +1,6168 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmul_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmul_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmul_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmul_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmul_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmul_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmul_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmul_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmul_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmul_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmul_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmul_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmul_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmul_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmul_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmul_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmul_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmul_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmul_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmul_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmul_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmul_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmul_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmul_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmul_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmul_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmul_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmul_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmul_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmul_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmul_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmul_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmul_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmul_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmul_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmul_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmul_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmul_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmul_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmul_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmul_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmul_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmul_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmul_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmul_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmul_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmul_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmul_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmul_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmul_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmul_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmul_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmul_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmul_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmul_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmul_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmul_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmul_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmul_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmul_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmul_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmul_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmul_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmul_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmul_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmul_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmul_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmul_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { + return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { + return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { + return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { + return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac_vv_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac_vx_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac_vv_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac_vx_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac_vv_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac_vx_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsac_vv_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsac_vx_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsac_vv_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsac_vx_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsac_vv_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsac_vx_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac_vv_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac_vx_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac_vv_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac_vx_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac_vv_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac_vx_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsac_vv_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsac_vx_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsac_vv_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsac_vx_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsac_vv_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsac_vx_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsac_vv_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsac_vx_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac_vv_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac_vx_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsac_vv_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsac_vx_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsac_vv_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsac_vx_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsac_vv_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsac_vx_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsac_vv_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsac_vx_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsac_vv_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsac_vx_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsac_vv_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsac_vx_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsac_vv_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsac_vx_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsac_vv_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsac_vx_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac_vv_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac_vx_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac_vv_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac_vx_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac_vv_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac_vx_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac_vv_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac_vx_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac_vv_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac_vx_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac_vv_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac_vx_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac_vv_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac_vx_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac_vv_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac_vx_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac_vv_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac_vx_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac_vv_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac_vx_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac_vv_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac_vx_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac_vv_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac_vx_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac_vv_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac_vx_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac_vv_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac_vx_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac_vv_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac_vx_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac_vv_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac_vx_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac_vv_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac_vx_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac_vv_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac_vx_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac_vv_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac_vx_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac_vv_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac_vx_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac_vv_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac_vx_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac_vv_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac_vx_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac_vv_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsac_vx_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac_vv_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsac_vx_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac_vv_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsac_vx_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsac_vv_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsac_vx_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsac_vv_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsac_vx_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsac_vv_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsac_vx_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac_vv_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsac_vx_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac_vv_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsac_vx_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac_vv_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsac_vx_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsac_vv_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsac_vx_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsac_vv_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsac_vx_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsac_vv_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsac_vx_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsac_vv_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsac_vx_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac_vv_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsac_vx_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsac_vv_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsac_vx_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsac_vv_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsac_vx_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsac_vv_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsac_vx_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsac_vv_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsac_vx_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsac_vv_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsac_vx_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsac_vv_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsac_vx_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsac_vv_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsac_vx_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsac_vv_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsac_vx_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac_vv_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsac_vx_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac_vv_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsac_vx_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac_vv_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsac_vx_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac_vv_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsac_vx_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac_vv_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsac_vx_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac_vv_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsac_vx_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac_vv_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsac_vx_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac_vv_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsac_vx_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac_vv_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsac_vx_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac_vv_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsac_vx_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac_vv_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsac_vx_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac_vv_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsac_vx_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac_vv_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsac_vx_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac_vv_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsac_vx_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac_vv_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsac_vx_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac_vv_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsac_vx_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac_vv_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsac_vx_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac_vv_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsac_vx_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac_vv_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsac_vx_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac_vv_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsac_vx_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac_vv_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsac_vx_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac_vv_u64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsac_vx_u64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub_vv_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub_vx_i8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub_vv_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub_vx_i8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub_vv_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub_vx_i8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsub_vv_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsub_vx_i8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsub_vv_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsub_vx_i8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsub_vv_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsub_vx_i8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub_vv_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub_vx_i8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub_vv_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub_vx_i16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub_vv_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub_vx_i16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsub_vv_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsub_vx_i16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsub_vv_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsub_vx_i16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsub_vv_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsub_vx_i16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsub_vv_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsub_vx_i16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub_vv_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub_vx_i32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsub_vv_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsub_vx_i32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsub_vv_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsub_vx_i32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsub_vv_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsub_vx_i32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsub_vv_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsub_vx_i32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsub_vv_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsub_vx_i64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsub_vv_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsub_vx_i64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsub_vv_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsub_vx_i64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsub_vv_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsub_vx_i64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub_vv_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub_vx_u8mf8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub_vv_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub_vx_u8mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub_vv_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub_vx_u8mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub_vv_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub_vx_u8m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub_vv_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub_vx_u8m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub_vv_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub_vx_u8m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub_vv_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub_vx_u8m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub_vv_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub_vx_u16mf4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub_vv_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub_vx_u16mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub_vv_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub_vx_u16m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub_vv_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub_vx_u16m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub_vv_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub_vx_u16m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub_vv_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub_vx_u16m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub_vv_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub_vx_u32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub_vv_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub_vx_u32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub_vv_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub_vx_u32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub_vv_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub_vx_u32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub_vv_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub_vx_u32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub_vv_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub_vx_u64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub_vv_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub_vx_u64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub_vv_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub_vx_u64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub_vv_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub_vx_u64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub_vv_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { + return vnmsub_vx_i8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub_vv_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { + return vnmsub_vx_i8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub_vv_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { + return vnmsub_vx_i8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vnmsub_vv_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { + return vnmsub_vx_i8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vnmsub_vv_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { + return vnmsub_vx_i8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vnmsub_vv_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { + return vnmsub_vx_i8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub_vv_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { + return vnmsub_vx_i8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub_vv_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { + return vnmsub_vx_i16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub_vv_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { + return vnmsub_vx_i16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vnmsub_vv_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { + return vnmsub_vx_i16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vnmsub_vv_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { + return vnmsub_vx_i16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vnmsub_vv_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { + return vnmsub_vx_i16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vnmsub_vv_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { + return vnmsub_vx_i16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub_vv_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { + return vnmsub_vx_i32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vnmsub_vv_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { + return vnmsub_vx_i32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vnmsub_vv_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { + return vnmsub_vx_i32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vnmsub_vv_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { + return vnmsub_vx_i32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vnmsub_vv_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { + return vnmsub_vx_i32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vnmsub_vv_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { + return vnmsub_vx_i64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vnmsub_vv_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { + return vnmsub_vx_i64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vnmsub_vv_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { + return vnmsub_vx_i64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vnmsub_vv_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { + return vnmsub_vx_i64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub_vv_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { + return vnmsub_vx_u8mf8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub_vv_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { + return vnmsub_vx_u8mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub_vv_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { + return vnmsub_vx_u8mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub_vv_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { + return vnmsub_vx_u8m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub_vv_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { + return vnmsub_vx_u8m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub_vv_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { + return vnmsub_vx_u8m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub_vv_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { + return vnmsub_vx_u8m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub_vv_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { + return vnmsub_vx_u16mf4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub_vv_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { + return vnmsub_vx_u16mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub_vv_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { + return vnmsub_vx_u16m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub_vv_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { + return vnmsub_vx_u16m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub_vv_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { + return vnmsub_vx_u16m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub_vv_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { + return vnmsub_vx_u16m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub_vv_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { + return vnmsub_vx_u32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub_vv_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { + return vnmsub_vx_u32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub_vv_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { + return vnmsub_vx_u32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub_vv_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { + return vnmsub_vx_u32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub_vv_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { + return vnmsub_vx_u32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub_vv_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { + return vnmsub_vx_u64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub_vv_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { + return vnmsub_vx_u64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub_vv_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { + return vnmsub_vx_u64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub_vv_u64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { + return vnmsub_vx_u64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vor_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vor_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vor_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vor_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vor_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vor_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vor_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vor_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vor_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vor_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vor_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vor_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vor_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vor_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vor_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vor_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vor_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vor_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vor_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vor_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vor_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vor_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vor_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vor_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vor_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vor_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vor_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vor_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vor_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vor_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vor_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vor_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vor_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vor_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vor_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vor_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vor_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vor_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vor_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vor_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vor_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vor_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vor_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vor_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vor_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vor_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vor_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vor_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vor_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vor_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vor_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vor_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vor_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vor_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vor_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vor_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vor_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vor_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vor_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vor_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vor_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vor_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vor_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vor_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vor_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vor_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vor_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vor_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vor_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vor_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vor_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vor_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vor_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vor_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vor_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vor_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vor_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vor_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vor_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vor_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vor_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vor_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vor_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vor_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vrem_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vrem_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vrem_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vrem_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vrem_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vrem_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vrem_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vrem_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vrem_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vrem_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vrem_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vrem_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vrem_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vrem_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vrem_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vrem_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vrem_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vrem_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vrem_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vrem_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vrem_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vremu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vremu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vremu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vremu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vremu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vremu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vremu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vremu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vremu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vremu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vremu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vremu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vremu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vremu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vremu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vremu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vremu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vremu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vremu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vremu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vremu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vrem_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vrem_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vrem_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vrem_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vrem_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vrem_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vrem_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vrem_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vrem_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vrem_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vrem_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vrem_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vrem_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vrem_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vrem_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vrem_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vrem_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vrem_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vrem_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vrem_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vrem_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vrem_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vrem_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vrem_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vrem_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vrem_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vremu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vremu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vremu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vremu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vremu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vremu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vremu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vremu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vremu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vremu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vremu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vremu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vremu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vremu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vremu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vremu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vremu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vremu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vremu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vremu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vremu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vremu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vremu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vremu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vremu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vremu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c @@ -0,0 +1,1240 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vrsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vrsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vrsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vrsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vrsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vrsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vrsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vrsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll_vv_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll_vv_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll_vv_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll_vv_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll_vv_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll_vv_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll_vv_i8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll_vv_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll_vv_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll_vv_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll_vv_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll_vv_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll_vv_i16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll_vv_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll_vv_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll_vv_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll_vv_i32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll_vv_i64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll_vv_i64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll_vv_i64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll_vv_i64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll_vv_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll_vv_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll_vv_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll_vv_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll_vv_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll_vv_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll_vv_u8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll_vv_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll_vv_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll_vv_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll_vv_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll_vv_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll_vv_u16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll_vv_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll_vv_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll_vv_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll_vv_u32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll_vv_u64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll_vv_u64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll_vv_u64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll_vv_u64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll_vv_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll_vv_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll_vv_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll_vv_i8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll_vv_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll_vv_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll_vv_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll_vv_i16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll_vv_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll_vv_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll_vv_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll_vv_i32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll_vv_i64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll_vv_i64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll_vv_i64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll_vv_i64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vsll_vx_i64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsll_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsll_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsll_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsll_vv_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsll_vv_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsll_vv_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsll_vv_u8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsll_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsll_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsll_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsll_vv_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsll_vv_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsll_vv_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsll_vv_u16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsll_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsll_vv_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsll_vv_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsll_vv_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsll_vv_u32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsll_vv_u64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsll_vv_u64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsll_vv_u64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsll_vv_u64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vsll_vx_u64m8_m(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c @@ -0,0 +1,1240 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsra_vv_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsra_vv_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsra_vv_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsra_vv_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsra_vv_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsra_vv_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsra_vv_i8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsra_vv_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { + return vsra_vx_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsra_vv_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsra_vv_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsra_vv_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsra_vv_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsra_vv_i16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsra_vv_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsra_vv_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsra_vv_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsra_vv_i32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsra_vv_i64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsra_vv_i64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsra_vv_i64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsra_vv_i64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsra_vv_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsra_vv_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsra_vv_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsra_vv_i8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { + return vsra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsra_vv_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsra_vv_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsra_vv_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsra_vv_i16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsra_vv_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsra_vv_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsra_vv_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsra_vv_i32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsra_vv_i64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsra_vv_i64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsra_vv_i64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsra_vv_i64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { + return vsra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c @@ -0,0 +1,1241 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsrl_vv_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsrl_vv_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsrl_vv_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsrl_vv_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsrl_vv_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsrl_vv_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsrl_vv_u8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsrl_vv_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsrl_vv_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsrl_vv_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsrl_vv_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsrl_vv_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsrl_vv_u16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsrl_vv_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsrl_vv_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsrl_vv_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsrl_vv_u32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsrl_vv_u64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsrl_vv_u64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsrl_vv_u64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsrl_vv_u64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { + return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { + return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { + return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { + return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsub_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsub_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsub_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsub_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsub_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsub_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsub_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsub_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsub_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsub_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsub_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsub_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsub_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsub_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsub_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsub_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsub_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsub_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsub_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsub_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsub_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsub_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsub_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsub_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsub_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsub_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsub_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsub_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsub_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsub_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsub_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsub_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsub_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsub_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsub_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsub_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsub_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsub_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsub_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsub_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsub_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsub_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsub_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsub_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsub_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsub_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsub_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsub_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsub_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vsub_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vsub_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsub_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsub_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsub_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsub_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vsub_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsub_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsub_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsub_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsub_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsub_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsub_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsub_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsub_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c @@ -0,0 +1,2472 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vxor_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vxor_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vxor_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vxor_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vxor_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vxor_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vxor_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vxor_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vxor_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vxor_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vxor_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vxor_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vxor_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vxor_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vxor_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vxor_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vxor_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vxor_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vxor_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vxor_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vxor_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vxor_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vxor_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vxor_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vxor_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vxor_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vxor_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vxor_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vxor_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vxor_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vxor_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vxor_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vxor_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vxor_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vxor_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vxor_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vxor_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vxor_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vxor_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vxor_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vxor_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vxor_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vxor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vxor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vxor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vxor_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vxor_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vxor_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vxor_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return vxor_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vxor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vxor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vxor_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vxor_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vxor_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vxor_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return vxor_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vxor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vxor_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vxor_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vxor_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vxor_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return vxor_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vxor_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vxor_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vxor_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vxor_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return vxor_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vxor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vxor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vxor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vxor_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vxor_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vxor_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vxor_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return vxor_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vxor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vxor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vxor_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vxor_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vxor_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vxor_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return vxor_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vxor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vxor_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vxor_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vxor_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vxor_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return vxor_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vxor_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vxor_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vxor_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vxor_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return vxor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +}