diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -200,6 +200,16 @@ // Basic classes with automatic codegen. //===----------------------------------------------------------------------===// +class RVVOutOp1Builtin + : RVVBuiltin { + let IntrinsicTypes = [-1, 1]; +} + +class RVVOutOp0Op1Builtin + : RVVBuiltin { + let IntrinsicTypes = [-1, 0, 1]; +} + multiclass RVVBuiltinSet> suffixes_prototypes, list intrinsic_types> { @@ -229,6 +239,36 @@ : RVVBuiltinSet; } +// For special mangling name intrinsic functions +multiclass RVVWidenBuiltinSet vv_suffixes_prototype, + list vx_suffixes_prototype, + bit is_out_op1_builtin = false> { + let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in { + let Name = NAME # "_" # vv_suffixes_prototype[0] in { + defvar suffix = vv_suffixes_prototype[1]; + defvar prototype = vv_suffixes_prototype[2]; + if is_out_op1_builtin then { + def : RVVOutOp1Builtin; + } else { + def : RVVOutOp0Op1Builtin; + } + } + // Encode vx/wx in the sufffix of mangled name + let Name = NAME # "_" # vx_suffixes_prototype[0], + MangledName = NAME # "_" # vx_suffixes_prototype[0] in { + defvar suffix = vx_suffixes_prototype[1]; + defvar prototype = vx_suffixes_prototype[2]; + if is_out_op1_builtin then { + def : RVVOutOp1Builtin; + } else { + def : RVVOutOp0Op1Builtin; + } + } + } +} + + defvar TypeList = ["c","s","i","l","f","d"]; defvar EEWList = [["8", "(Log2EEW:3)"], ["16", "(Log2EEW:4)"], @@ -413,7 +453,40 @@ ["vx", "Uv", "UvUvUe"]]>; // 12.2. Vector Widening Integer Add/Subtract -// TODO +let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { +// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW +defm vwaddu : RVVWidenBuiltinSet<"vwaddu", "csi", + ["vv", "Uw", "UwUvUv"], + ["vx", "Uw", "UwUvUe"]>; +defm vwsubu : RVVWidenBuiltinSet<"vwsubu", "csi", + ["vv", "Uw", "UwUvUv"], + ["vx", "Uw", "UwUvUe"]>; +// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW +defm vwadd : RVVWidenBuiltinSet<"vwadd", "csi", + ["vv", "w", "wvv"], + ["vx", "w", "wve"]>; +defm vwsub : RVVWidenBuiltinSet<"vwsub", "csi", + ["vv", "w", "wvv"], + ["vx", "w", "wve"]>; +// Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW +defm vwaddu : RVVWidenBuiltinSet<"vwaddu_w", "csi", + ["wv", "Uw", "UwUwUv"], + ["wx", "Uw", "UwUwUe"], + /*is_out_op1_builtin=*/true>; +defm vwsubu : RVVWidenBuiltinSet<"vwsubu_w", "csi", + ["wv", "Uw", "UwUwUv"], + ["wx", "Uw", "UwUwUe"], + /*is_out_op1_builtin=*/ true>; +// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW +defm vwadd : RVVWidenBuiltinSet<"vwadd_w", "csi", + ["wv", "w", "wwv"], + ["wx", "w", "wwe"], + /*is_out_op1_builtin=*/ true>; +defm vwsub : RVVWidenBuiltinSet<"vwsub_w", "csi", + ["wv", "w", "wwv"], + ["wx", "w", "wwe"], + /*is_out_op1_builtin=*/ true>; +} // 12.3. Vector Integer Extension // TODO diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -0,0 +1,3524 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint16mf4_t op2, + size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint8mf2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint16_t op2, + size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint16mf2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint16m1_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint16m2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint16m4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint32mf2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint32m1_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint32m2_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint32m4_t op2, + size_t vl) { + return vwaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -0,0 +1,3524 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint16mf4_t op2, + size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint8mf2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint16_t op2, + size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint16mf2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint16m1_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint16m2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint16m4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint32mf2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint32m1_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint32m2_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint32m4_t op2, + size_t vl) { + return vwsubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -0,0 +1,3524 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_wv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_wv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_wv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd_wv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd_wv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd_wv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwadd_wv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_wv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd_wv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd_wv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd_wv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd_wv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd_wv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd_wv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu_wv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu_wv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwaddu_wv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_wv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_wv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_wv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu_wv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwaddu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwaddu_wv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwaddu_wv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwaddu_wv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwaddu_wv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwaddu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwaddu_wv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwaddu_wv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwaddu_wv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwaddu_wv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwadd_wv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwadd_wv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwadd_wv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwadd_wv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwadd_wv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vwadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwadd_wv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int8_t op2, size_t vl) { + return vwadd_wx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vwadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint16mf4_t op2, + size_t vl) { + return vwadd_wv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwadd_wv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwadd_wv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwadd_wv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vwadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwadd_wv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int16_t op2, size_t vl) { + return vwadd_wx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwadd_wv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwadd_wv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwadd_wv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vwadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwadd_wv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int32_t op2, size_t vl) { + return vwadd_wx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwaddu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwaddu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vwaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint8mf2_t op2, + size_t vl) { + return vwaddu_wv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwaddu_wv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwaddu_wv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwaddu_wv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwaddu_wx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vwaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwaddu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint16_t op2, + size_t vl) { + return vwaddu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint16mf2_t op2, + size_t vl) { + return vwaddu_wv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vwaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint16m1_t op2, + size_t vl) { + return vwaddu_wv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vwaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint16m2_t op2, + size_t vl) { + return vwaddu_wv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vwaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint16m4_t op2, + size_t vl) { + return vwaddu_wv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwaddu_wx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint32mf2_t op2, + size_t vl) { + return vwaddu_wv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vwaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint32m1_t op2, + size_t vl) { + return vwaddu_wv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vwaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint32m2_t op2, + size_t vl) { + return vwaddu_wv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vwaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint32m4_t op2, + size_t vl) { + return vwaddu_wv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwaddu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c @@ -0,0 +1,3524 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_wv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_wv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_wv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub_wv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub_wv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub_wv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { + return vwsub_wv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_wv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub_wv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub_wv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub_wv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub_wv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub_wv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub_wv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu_wv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu_wv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { + return vwsubu_wv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_wv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_wv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_wv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu_wv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwsubu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { + return vwsubu_wv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { + return vwsubu_wv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { + return vwsubu_wv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { + return vwsubu_wv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwsubu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { + return vwsubu_wv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { + return vwsubu_wv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { + return vwsubu_wv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { + return vwsubu_wv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint8mf8_t op2, size_t vl) { + return vwsub_wv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint8mf4_t op2, size_t vl) { + return vwsub_wv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint8mf2_t op2, size_t vl) { + return vwsub_wv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vwsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint8m1_t op2, size_t vl) { + return vwsub_wv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vwsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint8m2_t op2, size_t vl) { + return vwsub_wv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vwsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vwsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint8m4_t op2, size_t vl) { + return vwsub_wv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int8_t op2, size_t vl) { + return vwsub_wx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vwsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint16mf4_t op2, + size_t vl) { + return vwsub_wv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint16mf2_t op2, size_t vl) { + return vwsub_wv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vwsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint16m1_t op2, size_t vl) { + return vwsub_wv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vwsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint16m2_t op2, size_t vl) { + return vwsub_wv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vwsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vwsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint16m4_t op2, size_t vl) { + return vwsub_wv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int16_t op2, size_t vl) { + return vwsub_wx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint32mf2_t op2, size_t vl) { + return vwsub_wv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vwsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint32m1_t op2, size_t vl) { + return vwsub_wv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vwsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint32m2_t op2, size_t vl) { + return vwsub_wv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vwsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vwsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint32m4_t op2, size_t vl) { + return vwsub_wv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int32_t op2, size_t vl) { + return vwsub_wx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint8mf8_t op2, + size_t vl) { + return vwsubu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint8mf4_t op2, + size_t vl) { + return vwsubu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vwsubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint8mf2_t op2, + size_t vl) { + return vwsubu_wv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint8m1_t op2, size_t vl) { + return vwsubu_wv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint8m2_t op2, size_t vl) { + return vwsubu_wv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vwsubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint8m4_t op2, size_t vl) { + return vwsubu_wv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint8_t op2, size_t vl) { + return vwsubu_wx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vwsubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint16mf4_t op2, + size_t vl) { + return vwsubu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint16_t op2, + size_t vl) { + return vwsubu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vwsubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint16mf2_t op2, + size_t vl) { + return vwsubu_wv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vwsubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint16m1_t op2, + size_t vl) { + return vwsubu_wv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vwsubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint16m2_t op2, + size_t vl) { + return vwsubu_wv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vwsubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vwsubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint16m4_t op2, + size_t vl) { + return vwsubu_wv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint16_t op2, size_t vl) { + return vwsubu_wx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vwsubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint32mf2_t op2, + size_t vl) { + return vwsubu_wv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vwsubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint32m1_t op2, + size_t vl) { + return vwsubu_wv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vwsubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint32m2_t op2, + size_t vl) { + return vwsubu_wv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vwsubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vwsubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint32m4_t op2, + size_t vl) { + return vwsubu_wv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint32_t op2, size_t vl) { + return vwsubu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); +}